#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#include <asm-xen/xen_proc.h>
-#else
-#include <asm/xen_proc.h>
-#endif
-
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include <asm-xen/xen_proc.h>
#include <asm-xen/hypervisor.h>
#include <asm-xen/ctrl_if.h>
-#else
-#include <asm/hypervisor.h>
-#include <asm/ctrl_if.h>
-#endif
-
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
-
#include <linux/list.h>
/* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
#define PAGE_TO_LIST(p) ( &p->list )
#define LIST_TO_PAGE(l) ( list_entry(l, struct page, list) )
#define UNLIST_PAGE(p) ( list_del(&p->list) )
+#define pte_offset_kernel pte_offset
#endif
/* List of ballooned pages, threaded through the mem_map array. */
pmd = pmd_offset(pgd, addr);
if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
ptep = pte_offset_kernel(pmd, addr);
-#else
- ptep = pte_offset(pmd, addr);
-#endif
return ptep;
}
}
kpgd = pgd_offset_k((unsigned long)pte_base);
kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
kpte = pte_offset_kernel(kpmd, (unsigned long)pte_base);
-#else
- kpte = pte_offset(kpmd, (unsigned long)pte_base);
-#endif
queue_l1_entry_update(kpte,
(*(unsigned long *)kpte)&~_PAGE_RW);
set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
if (len>count) len = count;
if (len<0) len = 0;
- copy_to_user(buffer, priv_bufp, len);
+ if ( copy_to_user(buffer, priv_bufp, len) != 0 )
+ return -EFAULT;
*offp += len;
return len;
.read = balloon_read,
.write = balloon_write
};
-#else
+#else
static int balloon_write(struct file *file, const char *buffer,
u_long count, void *data)
}
}
-
return len;
}
return len;
}
-
-
#endif
-
static int __init balloon_init(void)
{
printk(KERN_ALERT "Starting Xen Balloon driver\n");
#if 1
#define dprintf(fmt, args...) \
-printk(KERN_ALERT "[XEN:%s:%s:%d] " fmt, __FUNCTION__, __FILE__, __LINE__, ##args)
+printk(KERN_ALERT "[XEN:%s:%s:%d] " fmt, \
+__FUNCTION__, __FILE__, __LINE__, ##args)
#endif
-#define WPRINTK(fmt, args...) printk(KERN_WARNING "[XEN] " fmt, ##args)
+#define WPRINTK(fmt, args...) printk(KERN_WARNING "xen_blk: " fmt, ##args)
static int blkif_handle = 0;
static unsigned int blkif_state = BLKIF_STATE_CLOSED;
#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
(blkif_state != BLKIF_STATE_CONNECTED))
-static inline void translate_req_to_mfn(blkif_request_t *xreq,
- blkif_request_t *req);
-
-static inline void translate_req_to_pfn(blkif_request_t *xreq,
- blkif_request_t *req);
-
-static inline void flush_requests(void);
-
static void kick_pending_request_queues(void);
int __init xlblk_init(void);
}
-/************************** KERNEL VERSION 2.6 **************************/
+/************************ COMMON CODE (inlined) ************************/
+/* Kernel-specific definitions used in the common code */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-
#define DISABLE_SCATTERGATHER()
+#else
+#define DISABLE_SCATTERGATHER() (sg_operation = -1)
+#endif
+
+static inline void translate_req_to_pfn(blkif_request_t *xreq,
+ blkif_request_t *req)
+{
+ int i;
+
+ xreq->operation = req->operation;
+ xreq->nr_segments = req->nr_segments;
+ xreq->device = req->device;
+ /* preserve id */
+ xreq->sector_number = req->sector_number;
+
+ for ( i = 0; i < req->nr_segments; i++ )
+ xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
+}
+
+static inline void translate_req_to_mfn(blkif_request_t *xreq,
+ blkif_request_t *req)
+{
+ int i;
+
+ xreq->operation = req->operation;
+ xreq->nr_segments = req->nr_segments;
+ xreq->device = req->device;
+ xreq->id = req->id; /* copy id (unlike above) */
+ xreq->sector_number = req->sector_number;
+
+ for ( i = 0; i < req->nr_segments; i++ )
+ xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
+}
+
+
+static inline void flush_requests(void)
+{
+ DISABLE_SCATTERGATHER();
+ wmb(); /* Ensure that the frontend can see the requests. */
+ blk_ring->req_prod = req_prod;
+ notify_via_evtchn(blkif_evtchn);
+}
+
+
+
+
+/************************** KERNEL VERSION 2.6 **************************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
__initcall(xlblk_init);
static int nr_pending;
-#define DISABLE_SCATTERGATHER() (sg_operation = -1)
-
#define blkif_io_lock io_request_lock
/*============================================================================*/
/***************************** COMMON CODE *******************************/
-static inline void translate_req_to_pfn(blkif_request_t *xreq,
- blkif_request_t *req)
-{
- int i;
-
- xreq->operation = req->operation;
- xreq->nr_segments = req->nr_segments;
- xreq->device = req->device;
- /* preserve id */
- xreq->sector_number = req->sector_number;
-
- for ( i = 0; i < req->nr_segments; i++ ){
- xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
- }
-}
-
-static inline void translate_req_to_mfn(blkif_request_t *xreq,
- blkif_request_t *req)
-{
- int i;
-
- xreq->operation = req->operation;
- xreq->nr_segments = req->nr_segments;
- xreq->device = req->device;
- xreq->id = req->id; /* copy id (unlike above) */
- xreq->sector_number = req->sector_number;
-
- for ( i = 0; i < req->nr_segments; i++ ){
- xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
- }
-}
-
-
-
-static inline void flush_requests(void)
-{
- DISABLE_SCATTERGATHER();
- wmb(); /* Ensure that the frontend can see the requests. */
- blk_ring->req_prod = req_prod;
- notify_via_evtchn(blkif_evtchn);
-}
-
-
void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
{
unsigned long flags, id;
/* Send a driver status notification to the domain controller. */
-static void send_driver_status(int ok){
+static void send_driver_status(int ok)
+{
ctrl_msg_t cmsg = {
.type = CMSG_BLKIF_FE,
.subtype = CMSG_BLKIF_FE_DRIVER_STATUS,
}
/* Tell the controller to bring up the interface. */
-static void blkif_send_interface_connect(void){
+static void blkif_send_interface_connect(void)
+{
ctrl_msg_t cmsg = {
.type = CMSG_BLKIF_FE,
.subtype = CMSG_BLKIF_FE_INTERFACE_CONNECT,
static void blkif_free(void)
{
+ printk(KERN_INFO "xen_blk: Recovering virtual block device driver\n");
- printk(KERN_INFO "[XEN] Recovering virtual block device driver\n");
-
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&blkif_io_lock);
recovery = 1;
spin_unlock_irq(&blkif_io_lock);
/* Free resources associated with old device channel. */
- if(blk_ring){
+ if ( blk_ring != NULL )
+ {
free_page((unsigned long)blk_ring);
- blk_ring = 0;
+ blk_ring = NULL;
}
free_irq(blkif_irq, NULL);
blkif_irq = 0;
blkif_evtchn = 0;
}
-static void blkif_close(void){
+static void blkif_close(void)
+{
}
/* Move from CLOSED to DISCONNECTED state. */
static void blkif_reset(void)
{
- printk(KERN_INFO "[XEN] Recovering virtual block device driver\n");
+ printk(KERN_INFO "xen_blk: Recovering virtual block device driver\n");
blkif_free();
blkif_disconnect();
}
static void blkif_recover(void)
{
-
int i;
/* Hmm, requests might be re-ordered when we re-issue them.
* This will need to be fixed once we have barriers */
/* Stage 1 : Find active and move to safety. */
- for ( i = 0; i < BLKIF_RING_SIZE; i++ ) {
- if ( rec_ring[i].id >= PAGE_OFFSET ) {
+ for ( i = 0; i < BLKIF_RING_SIZE; i++ )
+ {
+ if ( rec_ring[i].id >= PAGE_OFFSET )
+ {
translate_req_to_mfn(
&blk_ring->ring[req_prod].req, &rec_ring[i]);
req_prod++;
printk(KERN_ALERT"blkfront: recovered %d descriptors\n",req_prod);
/* Stage 2 : Set up shadow list. */
- for ( i = 0; i < req_prod; i++ ) {
+ for ( i = 0; i < req_prod; i++ )
+ {
rec_ring[i].id = blk_ring->ring[i].req.id;
blk_ring->ring[i].req.id = i;
translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req);
}
/* Stage 3 : Set up free list. */
- for ( ; i < BLKIF_RING_SIZE; i++ ){
+ for ( ; i < BLKIF_RING_SIZE; i++ )
rec_ring[i].id = i+1;
- }
rec_ring_free = req_prod;
rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
err = request_irq(blkif_irq, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL);
- if(err){
- printk(KERN_ALERT "[XEN] blkfront request_irq failed (err=%d)\n", err);
+ if ( err )
+ {
+ printk(KERN_ALERT "xen_blk: request_irq failed (err=%d)\n", err);
return;
}
- if ( recovery ) {
+ if ( recovery )
+ {
blkif_recover();
- } else {
+ }
+ else
+ {
/* Transition to connected in case we need to do
* a partition probe on a whole disk. */
blkif_state = BLKIF_STATE_CONNECTED;
static void blkif_status(blkif_fe_interface_status_t *status)
{
- if (status->handle != blkif_handle) {
+ if ( status->handle != blkif_handle )
+ {
WPRINTK(" Invalid blkif: handle=%u", status->handle);
return;
}
- switch (status->status) {
-
+ switch ( status->status )
+ {
case BLKIF_INTERFACE_STATUS_CLOSED:
- switch(blkif_state){
+ switch ( blkif_state )
+ {
case BLKIF_STATE_CLOSED:
unexpected(status);
break;
break;
case BLKIF_INTERFACE_STATUS_DISCONNECTED:
- switch(blkif_state){
+ switch ( blkif_state )
+ {
case BLKIF_STATE_CLOSED:
blkif_disconnect();
break;
break;
case BLKIF_INTERFACE_STATUS_CONNECTED:
- switch(blkif_state){
+ switch ( blkif_state )
+ {
case BLKIF_STATE_CLOSED:
unexpected(status);
blkif_disconnect();
break;
case BLKIF_INTERFACE_STATUS_CHANGED:
- switch(blkif_state){
+ switch ( blkif_state )
+ {
case BLKIF_STATE_CLOSED:
case BLKIF_STATE_DISCONNECTED:
unexpected(status);
ctrl_if_send_response(msg);
}
-int wait_for_blkif(void){
+int wait_for_blkif(void)
+{
int err = 0;
int i;
send_driver_status(1);
schedule_timeout(1);
}
- if (blkif_state != BLKIF_STATE_CONNECTED){
- printk(KERN_INFO "[XEN] Timeout connecting block device driver!\n");
+ if ( blkif_state != BLKIF_STATE_CONNECTED )
+ {
+ printk(KERN_INFO "xen_blk: Timeout connecting to device!\n");
err = -ENOSYS;
}
return err;
{
int i;
- if ( (xen_start_info.flags & SIF_INITDOMAIN)
- || (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
+ if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
+ (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
return 0;
- printk(KERN_INFO "[XEN] Initialising virtual block device driver\n");
+ printk(KERN_INFO "xen_blk: Initialising virtual block device driver\n");
rec_ring_free = 0;
- for (i=0; i<BLKIF_RING_SIZE; i++)
- {
+ for ( i = 0; i < BLKIF_RING_SIZE; i++ )
rec_ring[i].id = i+1;
- }
rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
(void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,